ResNet34

這裡為ResNet34的訓練結果及過程,詳細的結果以及討論會在Pdf報告中與其他Model呈現。

Library

In [ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import cv2
import glob
import time

#DATA
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split

#CNN
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Convolution2D,MaxPooling2D,Flatten,Dense, AveragePooling2D,GlobalAveragePooling2D, Input
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten,Dropout, BatchNormalization
from keras.optimizers import Adam
from keras.losses import CategoricalCrossentropy
from keras.layers.merge import concatenate
from keras.models import Model
from mlxtend.plotting import plot_confusion_matrix
from sklearn.metrics import multilabel_confusion_matrix,confusion_matrix
from sklearn.metrics import accuracy_score
from keras.models import Model,load_model
#VIS
from keras.utils.vis_utils import plot_model
from tensorflow.keras.preprocessing import image
from tensorflow.python.keras.models import load_model #Tensorflow 2.0

Data Preprocessing

In [ ]:
def Data_process(path): 
    '''
    parameters: path(STR) of the directory and flag(INT) to know if we prepare data of training or testing
    return: (LIST) of images of the dataset and the (LIST) of labels
    
    For training:
    -Read images of every directory and extract all images
    -Resize to (150,150,3)
    -Read the directory name and asign as a class
    '''
    X = []
    y = []
    for directory in sorted(glob.glob(os.path.join(path, '*')), key = lambda k: k.split("/")[-1]):
            for imgs in glob.glob(os.path.join(directory,'*.jpg')):
                img_cv = cv2.imread(imgs)
                img_cv = cv2.cvtColor(img_cv,cv2.COLOR_BGR2RGB)
                img_cv_r = cv2.resize(img_cv,(150,150))
                X.append(img_cv_r)
                y.append(int(directory.split("/")[-1].replace('c','')))    

    X_train, X_test, y_train, y_test =  train_test_split(X,y, test_size = 0.2)

    return np.array(X_train), np.array(X_test) ,np.array(y_train), np.array(y_test)
In [ ]:
#Paths
train_data_path = "/kaggle/input/state-farm-distracted-driver-detection/imgs/train/"
test_data_path =  "/kaggle/input/state-farm-distracted-driver-detection/imgs/test/"

#List of Images for Train and Test
X_train, X_test , y_train, y_test= Data_process(train_data_path)

print('X_train shape:',X_train.shape)
print('y_train shape:',y_train.shape)
print('X_test shape:',X_test.shape)
print('y_test shape:',y_test.shape)
X_train shape: (17939, 150, 150, 3)
y_train shape: (17939,)
X_test shape: (4485, 150, 150, 3)
y_test shape: (4485,)
In [ ]:
y_train_one = keras.utils.to_categorical(y_train,10)
y_test_one = keras.utils.to_categorical(y_test,10)

print('X_train shape:',X_train.shape)
print('y_train shape:',y_train_one.shape)
print('X_test shape:',X_test.shape)
print('y_test shape:',y_test_one.shape)
X_train shape: (17939, 150, 150, 3)
y_train shape: (17939, 10)
X_test shape: (4485, 150, 150, 3)
y_test shape: (4485, 10)

Data Observation

In [ ]:
print("There are ",np.unique(y_train),"different labels in this dataset.")
print('Labels count in y_train:', np.bincount(y_train))
print('Labels count in y_test:', np.bincount(y_test),'\n')

label = {"mask_name":[],"data":[]}
for i in np.unique(y_train):
    label["mask_name"].append("label " + str(i))
    label["data"].append(X_train[y_train==i,:])
    
labels=['safe driving','texting - right','talking on the phone - right'
        ,'texting - left','talking on the phone - left','operating the radio',
        'drinking','reaching behind','hair and makeup','talking to passenger']
print('Showing some examples of different labels\n')
#fig, axes = plt.subplots(2, 5, figsize = (5, 5))
fig = plt.figure(figsize = (20, 20))
for i in np.unique(y_train):
  ax = fig.add_subplot(2, 5, i+1)
  ax.set_xticks([])
  ax.set_yticks([])
  ax.imshow(label["data"][i][1])
  ax.set_title('Label '+str(i)+' '+str(labels[i]), fontsize = 18)
  plt.tight_layout()
There are  [0 1 2 3 4 5 6 7 8 9] different labels in this dataset.
Labels count in y_train: [1979 1815 1860 1880 1853 1870 1858 1588 1539 1697]
Labels count in y_test: [510 452 457 466 473 442 467 414 372 432] 

Showing some examples of different labels

Building Model

ResNet34

In [ ]:
def Conv2d_BN(layer, nb_filter,kernel_size, padding='same',strides=(1,1)):

    layer = Conv2D(nb_filter,kernel_size,padding=padding,strides=strides,activation='relu')(layer)
    layer = BatchNormalization(axis=3)(layer)

    return layer

def Residual_Block(input_model,nb_filter,kernel_size,strides=(1,1), shortcut =False):
    layer= Conv2d_BN(input_model,nb_filter=nb_filter,kernel_size=kernel_size,strides=strides,padding='same')
    layer= Conv2d_BN(layer, nb_filter=nb_filter, kernel_size=kernel_size,padding='same')
    
    if shortcut:
        shortcut = Conv2d_BN(input_model,nb_filter=nb_filter,strides=strides,kernel_size=kernel_size)
        layer= layers.add([layer,shortcut])
        return layer
    else:
        layer= layers.add([layer,input_model])
        return layer
    
def ResNet34(width, height, depth, classes):
    
    Img = Input(shape=(width,height,depth))
    
    layer= Conv2d_BN(Img,64,(7,7),strides=(2,2),padding='same')
    layer= MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='same')(layer)  

    layer= Residual_Block(layer,nb_filter=64,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=64,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=64,kernel_size=(3,3))

    layer= Residual_Block(layer,nb_filter=128,kernel_size=(3,3),strides=(2,2),shortcut=True)
    layer= Residual_Block(layer,nb_filter=128,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=128,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=128,kernel_size=(3,3))

    layer= Residual_Block(layer,nb_filter=256,kernel_size=(3,3),strides=(2,2),shortcut=True)
    layer= Residual_Block(layer,nb_filter=256,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=256,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=256,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=256,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=256,kernel_size=(3,3))
    
    layer= Residual_Block(layer,nb_filter=512,kernel_size=(3,3),strides=(2,2),shortcut=True)
    layer= Residual_Block(layer,nb_filter=512,kernel_size=(3,3))
    layer= Residual_Block(layer,nb_filter=512,kernel_size=(3,3))

    layer= GlobalAveragePooling2D()(layer)
    layer= Dense(classes,activation='softmax')(layer)
    
    model=Model(inputs=Img,outputs=layer)
    return model
In [ ]:
ResNet34_model = ResNet34(150,150,3,10)
ResNet34_model.summary()
plot_model(ResNet34_model, show_shapes=True)
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 150, 150, 3) 0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 75, 75, 64)   9472        input_1[0][0]                    
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 75, 75, 64)   256         conv2d[0][0]                     
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 38, 38, 64)   0           batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 38, 38, 64)   36928       max_pooling2d[0][0]              
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 38, 38, 64)   256         conv2d_1[0][0]                   
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 38, 38, 64)   36928       batch_normalization_1[0][0]      
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 38, 38, 64)   256         conv2d_2[0][0]                   
__________________________________________________________________________________________________
add (Add)                       (None, 38, 38, 64)   0           batch_normalization_2[0][0]      
                                                                 max_pooling2d[0][0]              
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 38, 38, 64)   36928       add[0][0]                        
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 38, 38, 64)   256         conv2d_3[0][0]                   
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 38, 38, 64)   36928       batch_normalization_3[0][0]      
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 38, 38, 64)   256         conv2d_4[0][0]                   
__________________________________________________________________________________________________
add_1 (Add)                     (None, 38, 38, 64)   0           batch_normalization_4[0][0]      
                                                                 add[0][0]                        
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 38, 38, 64)   36928       add_1[0][0]                      
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 38, 38, 64)   256         conv2d_5[0][0]                   
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 38, 38, 64)   36928       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 38, 38, 64)   256         conv2d_6[0][0]                   
__________________________________________________________________________________________________
add_2 (Add)                     (None, 38, 38, 64)   0           batch_normalization_6[0][0]      
                                                                 add_1[0][0]                      
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 19, 19, 128)  73856       add_2[0][0]                      
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 19, 19, 128)  512         conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 19, 19, 128)  147584      batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 19, 19, 128)  73856       add_2[0][0]                      
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 19, 19, 128)  512         conv2d_8[0][0]                   
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 19, 19, 128)  512         conv2d_9[0][0]                   
__________________________________________________________________________________________________
add_3 (Add)                     (None, 19, 19, 128)  0           batch_normalization_8[0][0]      
                                                                 batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 19, 19, 128)  147584      add_3[0][0]                      
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 19, 19, 128)  512         conv2d_10[0][0]                  
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 19, 19, 128)  147584      batch_normalization_10[0][0]     
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 19, 19, 128)  512         conv2d_11[0][0]                  
__________________________________________________________________________________________________
add_4 (Add)                     (None, 19, 19, 128)  0           batch_normalization_11[0][0]     
                                                                 add_3[0][0]                      
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 19, 19, 128)  147584      add_4[0][0]                      
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 19, 19, 128)  512         conv2d_12[0][0]                  
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 19, 19, 128)  147584      batch_normalization_12[0][0]     
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 19, 19, 128)  512         conv2d_13[0][0]                  
__________________________________________________________________________________________________
add_5 (Add)                     (None, 19, 19, 128)  0           batch_normalization_13[0][0]     
                                                                 add_4[0][0]                      
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 19, 19, 128)  147584      add_5[0][0]                      
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 19, 19, 128)  512         conv2d_14[0][0]                  
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 19, 19, 128)  147584      batch_normalization_14[0][0]     
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 19, 19, 128)  512         conv2d_15[0][0]                  
__________________________________________________________________________________________________
add_6 (Add)                     (None, 19, 19, 128)  0           batch_normalization_15[0][0]     
                                                                 add_5[0][0]                      
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 10, 10, 256)  295168      add_6[0][0]                      
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 10, 10, 256)  1024        conv2d_16[0][0]                  
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 10, 10, 256)  590080      batch_normalization_16[0][0]     
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 10, 10, 256)  295168      add_6[0][0]                      
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 10, 10, 256)  1024        conv2d_17[0][0]                  
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 10, 10, 256)  1024        conv2d_18[0][0]                  
__________________________________________________________________________________________________
add_7 (Add)                     (None, 10, 10, 256)  0           batch_normalization_17[0][0]     
                                                                 batch_normalization_18[0][0]     
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 10, 10, 256)  590080      add_7[0][0]                      
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 10, 10, 256)  1024        conv2d_19[0][0]                  
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 10, 10, 256)  590080      batch_normalization_19[0][0]     
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 10, 10, 256)  1024        conv2d_20[0][0]                  
__________________________________________________________________________________________________
add_8 (Add)                     (None, 10, 10, 256)  0           batch_normalization_20[0][0]     
                                                                 add_7[0][0]                      
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 10, 10, 256)  590080      add_8[0][0]                      
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 10, 10, 256)  1024        conv2d_21[0][0]                  
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 10, 10, 256)  590080      batch_normalization_21[0][0]     
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 10, 10, 256)  1024        conv2d_22[0][0]                  
__________________________________________________________________________________________________
add_9 (Add)                     (None, 10, 10, 256)  0           batch_normalization_22[0][0]     
                                                                 add_8[0][0]                      
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 10, 10, 256)  590080      add_9[0][0]                      
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 10, 10, 256)  1024        conv2d_23[0][0]                  
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 10, 10, 256)  590080      batch_normalization_23[0][0]     
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 10, 10, 256)  1024        conv2d_24[0][0]                  
__________________________________________________________________________________________________
add_10 (Add)                    (None, 10, 10, 256)  0           batch_normalization_24[0][0]     
                                                                 add_9[0][0]                      
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 10, 10, 256)  590080      add_10[0][0]                     
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 10, 10, 256)  1024        conv2d_25[0][0]                  
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 10, 10, 256)  590080      batch_normalization_25[0][0]     
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 10, 10, 256)  1024        conv2d_26[0][0]                  
__________________________________________________________________________________________________
add_11 (Add)                    (None, 10, 10, 256)  0           batch_normalization_26[0][0]     
                                                                 add_10[0][0]                     
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 10, 10, 256)  590080      add_11[0][0]                     
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 10, 10, 256)  1024        conv2d_27[0][0]                  
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 10, 10, 256)  590080      batch_normalization_27[0][0]     
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 10, 10, 256)  1024        conv2d_28[0][0]                  
__________________________________________________________________________________________________
add_12 (Add)                    (None, 10, 10, 256)  0           batch_normalization_28[0][0]     
                                                                 add_11[0][0]                     
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 5, 5, 512)    1180160     add_12[0][0]                     
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 5, 5, 512)    2048        conv2d_29[0][0]                  
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 5, 5, 512)    2359808     batch_normalization_29[0][0]     
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 5, 5, 512)    1180160     add_12[0][0]                     
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 5, 5, 512)    2048        conv2d_30[0][0]                  
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 5, 5, 512)    2048        conv2d_31[0][0]                  
__________________________________________________________________________________________________
add_13 (Add)                    (None, 5, 5, 512)    0           batch_normalization_30[0][0]     
                                                                 batch_normalization_31[0][0]     
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 5, 5, 512)    2359808     add_13[0][0]                     
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 5, 5, 512)    2048        conv2d_32[0][0]                  
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 5, 5, 512)    2359808     batch_normalization_32[0][0]     
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 5, 5, 512)    2048        conv2d_33[0][0]                  
__________________________________________________________________________________________________
add_14 (Add)                    (None, 5, 5, 512)    0           batch_normalization_33[0][0]     
                                                                 add_13[0][0]                     
__________________________________________________________________________________________________
conv2d_34 (Conv2D)              (None, 5, 5, 512)    2359808     add_14[0][0]                     
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 5, 5, 512)    2048        conv2d_34[0][0]                  
__________________________________________________________________________________________________
conv2d_35 (Conv2D)              (None, 5, 5, 512)    2359808     batch_normalization_34[0][0]     
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 5, 5, 512)    2048        conv2d_35[0][0]                  
__________________________________________________________________________________________________
add_15 (Add)                    (None, 5, 5, 512)    0           batch_normalization_35[0][0]     
                                                                 add_14[0][0]                     
__________________________________________________________________________________________________
global_average_pooling2d (Globa (None, 512)          0           add_15[0][0]                     
__________________________________________________________________________________________________
dense (Dense)                   (None, 10)           5130        global_average_pooling2d[0][0]   
==================================================================================================
Total params: 22,691,594
Trainable params: 22,674,570
Non-trainable params: 17,024
__________________________________________________________________________________________________
Out[ ]:

Training ResNet34

In [ ]:
from keras.callbacks import ModelCheckpoint

#path creation
base_filename='ResNet34_best_weight'
filename_suffix = 'hdf5'
k=1
learning_rate=[0.0001, 0.0005, 0.001]
batch_size=[16, 32, 48]

for i in (learning_rate):
    for j in (batch_size):
        print("Learning rate:", i ,"& Batch size:", j)
        ResNet34_model = ResNet34(150,150,3,10)
        ResNet34_model.compile(optimizer=Adam(lr=i, beta_1=0.9, beta_2=0.999, epsilon=1e-08),loss = 'categorical_crossentropy',metrics=['accuracy'])
        tStart = time.time()#計時開始
## save best model

        number = str(k)
        filepath=os.path.join(base_filename + number + "." + filename_suffix)
        early_stop  = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
        checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
        callbacks_list = [checkpoint]
        hist_ResNet34=ResNet34_model.fit(X_train,y_train_one,batch_size=j,epochs=10,validation_split=0.2, callbacks=[early_stop, checkpoint], verbose=1)
        tEnd = time.time()#計時結束
        k+=1
        print("Training Time :",tEnd-tStart,"\n")
        
        scores = ResNet34_model.evaluate(X_test,y_test_one,verbose=1)
        
        acc = hist_ResNet34.history['accuracy']
        val_acc = hist_ResNet34.history['val_accuracy']
        loss = hist_ResNet34.history['loss']
        val_loss = hist_ResNet34.history['val_loss']
        epochs = range(1, len(acc) + 1)
        plt.subplot(1, 2, 1)
        plt.plot(epochs, acc, label='Training acc')
        plt.plot(epochs, val_acc, label='Validation acc')
        plt.title('Training and validation accuracy')
        plt.legend()
        plt.subplot(1, 2, 2)
        plt.plot(epochs, loss, label='Training loss')
        plt.plot(epochs, val_loss, label='Validation loss')
        plt.title('Training and validation loss')
        plt.legend()
        plt.tight_layout()
        plt.show()
        
Learning rate: 0.0001 & Batch size: 16
Epoch 1/10
897/897 [==============================] - ETA: 0s - loss: 0.5113 - accuracy: 0.8385
Epoch 00001: val_loss improved from inf to 0.13223, saving model to ResNet34_best_weight1.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.5113 - accuracy: 0.8385 - val_loss: 0.1322 - val_accuracy: 0.9574
Epoch 2/10
896/897 [============================>.] - ETA: 0s - loss: 0.0966 - accuracy: 0.9699
Epoch 00002: val_loss improved from 0.13223 to 0.09200, saving model to ResNet34_best_weight1.hdf5
897/897 [==============================] - 42s 47ms/step - loss: 0.0965 - accuracy: 0.9700 - val_loss: 0.0920 - val_accuracy: 0.9727
Epoch 3/10
897/897 [==============================] - ETA: 0s - loss: 0.0665 - accuracy: 0.9790
Epoch 00003: val_loss did not improve from 0.09200
897/897 [==============================] - 41s 46ms/step - loss: 0.0665 - accuracy: 0.9790 - val_loss: 0.2004 - val_accuracy: 0.9429
Epoch 4/10
896/897 [============================>.] - ETA: 0s - loss: 0.0519 - accuracy: 0.9852
Epoch 00004: val_loss improved from 0.09200 to 0.09154, saving model to ResNet34_best_weight1.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.0518 - accuracy: 0.9852 - val_loss: 0.0915 - val_accuracy: 0.9771
Epoch 5/10
896/897 [============================>.] - ETA: 0s - loss: 0.0527 - accuracy: 0.9844
Epoch 00005: val_loss improved from 0.09154 to 0.05037, saving model to ResNet34_best_weight1.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.0527 - accuracy: 0.9844 - val_loss: 0.0504 - val_accuracy: 0.9841
Epoch 6/10
897/897 [==============================] - ETA: 0s - loss: 0.0385 - accuracy: 0.9885
Epoch 00006: val_loss improved from 0.05037 to 0.04801, saving model to ResNet34_best_weight1.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.0385 - accuracy: 0.9885 - val_loss: 0.0480 - val_accuracy: 0.9900
Epoch 7/10
896/897 [============================>.] - ETA: 0s - loss: 0.0391 - accuracy: 0.9877
Epoch 00007: val_loss did not improve from 0.04801
897/897 [==============================] - 41s 46ms/step - loss: 0.0391 - accuracy: 0.9877 - val_loss: 0.1744 - val_accuracy: 0.9546
Epoch 8/10
896/897 [============================>.] - ETA: 0s - loss: 0.0364 - accuracy: 0.9895
Epoch 00008: val_loss did not improve from 0.04801
897/897 [==============================] - 41s 46ms/step - loss: 0.0364 - accuracy: 0.9895 - val_loss: 0.1321 - val_accuracy: 0.9699
Epoch 9/10
897/897 [==============================] - ETA: 0s - loss: 0.0323 - accuracy: 0.9916
Epoch 00009: val_loss improved from 0.04801 to 0.03785, saving model to ResNet34_best_weight1.hdf5
897/897 [==============================] - 42s 47ms/step - loss: 0.0323 - accuracy: 0.9916 - val_loss: 0.0378 - val_accuracy: 0.9897
Epoch 10/10
896/897 [============================>.] - ETA: 0s - loss: 0.0293 - accuracy: 0.9918
Epoch 00010: val_loss did not improve from 0.03785
897/897 [==============================] - 42s 46ms/step - loss: 0.0293 - accuracy: 0.9918 - val_loss: 0.0723 - val_accuracy: 0.9824
Training Time : 430.4006404876709 

141/141 [==============================] - 3s 21ms/step - loss: 0.0594 - accuracy: 0.9844
Learning rate: 0.0001 & Batch size: 32
Epoch 1/10
448/449 [============================>.] - ETA: 0s - loss: 0.5007 - accuracy: 0.8433
Epoch 00001: val_loss improved from inf to 0.26135, saving model to ResNet34_best_weight2.hdf5
449/449 [==============================] - 33s 73ms/step - loss: 0.5002 - accuracy: 0.8434 - val_loss: 0.2614 - val_accuracy: 0.9175
Epoch 2/10
448/449 [============================>.] - ETA: 0s - loss: 0.0811 - accuracy: 0.9743
Epoch 00002: val_loss improved from 0.26135 to 0.18260, saving model to ResNet34_best_weight2.hdf5
449/449 [==============================] - 32s 71ms/step - loss: 0.0814 - accuracy: 0.9742 - val_loss: 0.1826 - val_accuracy: 0.9482
Epoch 3/10
449/449 [==============================] - ETA: 0s - loss: 0.0462 - accuracy: 0.9860
Epoch 00003: val_loss improved from 0.18260 to 0.11014, saving model to ResNet34_best_weight2.hdf5
449/449 [==============================] - 32s 71ms/step - loss: 0.0462 - accuracy: 0.9860 - val_loss: 0.1101 - val_accuracy: 0.9693
Epoch 4/10
448/449 [============================>.] - ETA: 0s - loss: 0.0301 - accuracy: 0.9921
Epoch 00004: val_loss improved from 0.11014 to 0.04230, saving model to ResNet34_best_weight2.hdf5
449/449 [==============================] - 32s 71ms/step - loss: 0.0301 - accuracy: 0.9921 - val_loss: 0.0423 - val_accuracy: 0.9883
Epoch 5/10
448/449 [============================>.] - ETA: 0s - loss: 0.0393 - accuracy: 0.9886
Epoch 00005: val_loss did not improve from 0.04230
449/449 [==============================] - 31s 69ms/step - loss: 0.0393 - accuracy: 0.9886 - val_loss: 0.1306 - val_accuracy: 0.9654
Epoch 6/10
448/449 [============================>.] - ETA: 0s - loss: 0.0365 - accuracy: 0.9888
Epoch 00006: val_loss did not improve from 0.04230
449/449 [==============================] - 31s 68ms/step - loss: 0.0365 - accuracy: 0.9888 - val_loss: 0.0568 - val_accuracy: 0.9841
Epoch 7/10
448/449 [============================>.] - ETA: 0s - loss: 0.0244 - accuracy: 0.9943
Epoch 00007: val_loss improved from 0.04230 to 0.04179, saving model to ResNet34_best_weight2.hdf5
449/449 [==============================] - 32s 71ms/step - loss: 0.0243 - accuracy: 0.9943 - val_loss: 0.0418 - val_accuracy: 0.9866
Epoch 8/10
448/449 [============================>.] - ETA: 0s - loss: 0.0114 - accuracy: 0.9969
Epoch 00008: val_loss did not improve from 0.04179
449/449 [==============================] - 31s 68ms/step - loss: 0.0114 - accuracy: 0.9969 - val_loss: 0.0603 - val_accuracy: 0.9841
Epoch 9/10
448/449 [============================>.] - ETA: 0s - loss: 0.0335 - accuracy: 0.9902
Epoch 00009: val_loss did not improve from 0.04179
449/449 [==============================] - 31s 69ms/step - loss: 0.0335 - accuracy: 0.9902 - val_loss: 0.1327 - val_accuracy: 0.9615
Epoch 10/10
448/449 [============================>.] - ETA: 0s - loss: 0.0213 - accuracy: 0.9947
Epoch 00010: val_loss did not improve from 0.04179
449/449 [==============================] - 31s 69ms/step - loss: 0.0216 - accuracy: 0.9946 - val_loss: 0.0846 - val_accuracy: 0.9785
Training Time : 319.35647225379944 

141/141 [==============================] - 3s 21ms/step - loss: 0.0805 - accuracy: 0.9777
Learning rate: 0.0001 & Batch size: 48
Epoch 1/10
299/299 [==============================] - ETA: 0s - loss: 0.5434 - accuracy: 0.8327
Epoch 00001: val_loss improved from inf to 0.55233, saving model to ResNet34_best_weight3.hdf5
299/299 [==============================] - 30s 100ms/step - loss: 0.5434 - accuracy: 0.8327 - val_loss: 0.5523 - val_accuracy: 0.8258
Epoch 2/10
299/299 [==============================] - ETA: 0s - loss: 0.0564 - accuracy: 0.9825
Epoch 00002: val_loss improved from 0.55233 to 0.16487, saving model to ResNet34_best_weight3.hdf5
299/299 [==============================] - 29s 97ms/step - loss: 0.0564 - accuracy: 0.9825 - val_loss: 0.1649 - val_accuracy: 0.9501
Epoch 3/10
299/299 [==============================] - ETA: 0s - loss: 0.0547 - accuracy: 0.9822
Epoch 00003: val_loss did not improve from 0.16487
299/299 [==============================] - 28s 94ms/step - loss: 0.0547 - accuracy: 0.9822 - val_loss: 0.1740 - val_accuracy: 0.9529
Epoch 4/10
299/299 [==============================] - ETA: 0s - loss: 0.0460 - accuracy: 0.9854
Epoch 00004: val_loss improved from 0.16487 to 0.08496, saving model to ResNet34_best_weight3.hdf5
299/299 [==============================] - 29s 96ms/step - loss: 0.0460 - accuracy: 0.9854 - val_loss: 0.0850 - val_accuracy: 0.9752
Epoch 5/10
299/299 [==============================] - ETA: 0s - loss: 0.0194 - accuracy: 0.9945
Epoch 00005: val_loss improved from 0.08496 to 0.05587, saving model to ResNet34_best_weight3.hdf5
299/299 [==============================] - 29s 96ms/step - loss: 0.0194 - accuracy: 0.9945 - val_loss: 0.0559 - val_accuracy: 0.9838
Epoch 6/10
299/299 [==============================] - ETA: 0s - loss: 0.0195 - accuracy: 0.9941
Epoch 00006: val_loss did not improve from 0.05587
299/299 [==============================] - 28s 92ms/step - loss: 0.0195 - accuracy: 0.9941 - val_loss: 0.0709 - val_accuracy: 0.9777
Epoch 7/10
299/299 [==============================] - ETA: 0s - loss: 0.0220 - accuracy: 0.9932
Epoch 00007: val_loss did not improve from 0.05587
299/299 [==============================] - 28s 93ms/step - loss: 0.0220 - accuracy: 0.9932 - val_loss: 0.1273 - val_accuracy: 0.9699
Epoch 8/10
299/299 [==============================] - ETA: 0s - loss: 0.0293 - accuracy: 0.9918
Epoch 00008: val_loss did not improve from 0.05587
299/299 [==============================] - 28s 92ms/step - loss: 0.0293 - accuracy: 0.9918 - val_loss: 0.0880 - val_accuracy: 0.9766
Training Time : 233.58132910728455 

141/141 [==============================] - 3s 21ms/step - loss: 0.0962 - accuracy: 0.9770
Learning rate: 0.0005 & Batch size: 16
Epoch 1/10
897/897 [==============================] - ETA: 0s - loss: 0.4906 - accuracy: 0.8480
Epoch 00001: val_loss improved from inf to 0.19944, saving model to ResNet34_best_weight4.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.4906 - accuracy: 0.8480 - val_loss: 0.1994 - val_accuracy: 0.9409
Epoch 2/10
897/897 [==============================] - ETA: 0s - loss: 0.1192 - accuracy: 0.9653
Epoch 00002: val_loss did not improve from 0.19944
897/897 [==============================] - 42s 47ms/step - loss: 0.1192 - accuracy: 0.9653 - val_loss: 0.2164 - val_accuracy: 0.9328
Epoch 3/10
897/897 [==============================] - ETA: 0s - loss: 0.0827 - accuracy: 0.9746
Epoch 00003: val_loss did not improve from 0.19944
897/897 [==============================] - 41s 46ms/step - loss: 0.0827 - accuracy: 0.9746 - val_loss: 0.2615 - val_accuracy: 0.9225
Epoch 4/10
897/897 [==============================] - ETA: 0s - loss: 0.0723 - accuracy: 0.9788
Epoch 00004: val_loss improved from 0.19944 to 0.08363, saving model to ResNet34_best_weight4.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.0723 - accuracy: 0.9788 - val_loss: 0.0836 - val_accuracy: 0.9735
Epoch 5/10
897/897 [==============================] - ETA: 0s - loss: 0.0618 - accuracy: 0.9829
Epoch 00005: val_loss did not improve from 0.08363
897/897 [==============================] - 42s 47ms/step - loss: 0.0618 - accuracy: 0.9829 - val_loss: 0.1459 - val_accuracy: 0.9571
Epoch 6/10
896/897 [============================>.] - ETA: 0s - loss: 0.0546 - accuracy: 0.9849
Epoch 00006: val_loss improved from 0.08363 to 0.07446, saving model to ResNet34_best_weight4.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.0546 - accuracy: 0.9848 - val_loss: 0.0745 - val_accuracy: 0.9760
Epoch 7/10
897/897 [==============================] - ETA: 0s - loss: 0.0534 - accuracy: 0.9847
Epoch 00007: val_loss did not improve from 0.07446
897/897 [==============================] - 42s 46ms/step - loss: 0.0534 - accuracy: 0.9847 - val_loss: 0.1095 - val_accuracy: 0.9666
Epoch 8/10
897/897 [==============================] - ETA: 0s - loss: 0.0462 - accuracy: 0.9877
Epoch 00008: val_loss improved from 0.07446 to 0.05256, saving model to ResNet34_best_weight4.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.0462 - accuracy: 0.9877 - val_loss: 0.0526 - val_accuracy: 0.9875
Epoch 9/10
896/897 [============================>.] - ETA: 0s - loss: 0.0345 - accuracy: 0.9900
Epoch 00009: val_loss did not improve from 0.05256
897/897 [==============================] - 42s 47ms/step - loss: 0.0356 - accuracy: 0.9898 - val_loss: 0.1110 - val_accuracy: 0.9707
Epoch 10/10
897/897 [==============================] - ETA: 0s - loss: 0.0319 - accuracy: 0.9913
Epoch 00010: val_loss did not improve from 0.05256
897/897 [==============================] - 41s 46ms/step - loss: 0.0319 - accuracy: 0.9913 - val_loss: 0.0563 - val_accuracy: 0.9852
Training Time : 427.74996066093445 

141/141 [==============================] - 3s 21ms/step - loss: 0.0502 - accuracy: 0.9866
Learning rate: 0.0005 & Batch size: 32
Epoch 1/10
448/449 [============================>.] - ETA: 0s - loss: 0.4629 - accuracy: 0.8588
Epoch 00001: val_loss improved from inf to 0.28400, saving model to ResNet34_best_weight5.hdf5
449/449 [==============================] - 32s 72ms/step - loss: 0.4629 - accuracy: 0.8588 - val_loss: 0.2840 - val_accuracy: 0.9192
Epoch 2/10
448/449 [============================>.] - ETA: 0s - loss: 0.0847 - accuracy: 0.9755
Epoch 00002: val_loss did not improve from 0.28400
449/449 [==============================] - 31s 69ms/step - loss: 0.0849 - accuracy: 0.9755 - val_loss: 0.3740 - val_accuracy: 0.9033
Epoch 3/10
448/449 [============================>.] - ETA: 0s - loss: 0.0548 - accuracy: 0.9842
Epoch 00003: val_loss did not improve from 0.28400
449/449 [==============================] - 31s 68ms/step - loss: 0.0548 - accuracy: 0.9842 - val_loss: 0.3008 - val_accuracy: 0.9066
Epoch 4/10
448/449 [============================>.] - ETA: 0s - loss: 0.0408 - accuracy: 0.9887
Epoch 00004: val_loss improved from 0.28400 to 0.13925, saving model to ResNet34_best_weight5.hdf5
449/449 [==============================] - 32s 71ms/step - loss: 0.0411 - accuracy: 0.9886 - val_loss: 0.1392 - val_accuracy: 0.9640
Epoch 5/10
448/449 [============================>.] - ETA: 0s - loss: 0.0470 - accuracy: 0.9855
Epoch 00005: val_loss improved from 0.13925 to 0.04222, saving model to ResNet34_best_weight5.hdf5
449/449 [==============================] - 32s 70ms/step - loss: 0.0470 - accuracy: 0.9855 - val_loss: 0.0422 - val_accuracy: 0.9902
Epoch 6/10
448/449 [============================>.] - ETA: 0s - loss: 0.0342 - accuracy: 0.9900
Epoch 00006: val_loss did not improve from 0.04222
449/449 [==============================] - 31s 69ms/step - loss: 0.0342 - accuracy: 0.9900 - val_loss: 0.1671 - val_accuracy: 0.9504
Epoch 7/10
448/449 [============================>.] - ETA: 0s - loss: 0.0275 - accuracy: 0.9928
Epoch 00007: val_loss did not improve from 0.04222
449/449 [==============================] - 31s 68ms/step - loss: 0.0277 - accuracy: 0.9928 - val_loss: 0.0495 - val_accuracy: 0.9858
Epoch 8/10
448/449 [============================>.] - ETA: 0s - loss: 0.0462 - accuracy: 0.9865
Epoch 00008: val_loss did not improve from 0.04222
449/449 [==============================] - 31s 69ms/step - loss: 0.0462 - accuracy: 0.9865 - val_loss: 0.1365 - val_accuracy: 0.9640
Training Time : 255.14576363563538 

141/141 [==============================] - 3s 21ms/step - loss: 0.1383 - accuracy: 0.9592
Learning rate: 0.0005 & Batch size: 48
Epoch 1/10
299/299 [==============================] - ETA: 0s - loss: 0.4909 - accuracy: 0.8551
Epoch 00001: val_loss improved from inf to 0.40423, saving model to ResNet34_best_weight6.hdf5
299/299 [==============================] - 29s 98ms/step - loss: 0.4909 - accuracy: 0.8551 - val_loss: 0.4042 - val_accuracy: 0.8910
Epoch 2/10
299/299 [==============================] - ETA: 0s - loss: 0.0793 - accuracy: 0.9771
Epoch 00002: val_loss improved from 0.40423 to 0.13215, saving model to ResNet34_best_weight6.hdf5
299/299 [==============================] - 29s 97ms/step - loss: 0.0793 - accuracy: 0.9771 - val_loss: 0.1322 - val_accuracy: 0.9601
Epoch 3/10
299/299 [==============================] - ETA: 0s - loss: 0.0436 - accuracy: 0.9885
Epoch 00003: val_loss did not improve from 0.13215
299/299 [==============================] - 28s 92ms/step - loss: 0.0436 - accuracy: 0.9885 - val_loss: 0.1775 - val_accuracy: 0.9451
Epoch 4/10
299/299 [==============================] - ETA: 0s - loss: 0.0468 - accuracy: 0.9853
Epoch 00004: val_loss improved from 0.13215 to 0.07552, saving model to ResNet34_best_weight6.hdf5
299/299 [==============================] - 29s 97ms/step - loss: 0.0468 - accuracy: 0.9853 - val_loss: 0.0755 - val_accuracy: 0.9797
Epoch 5/10
299/299 [==============================] - ETA: 0s - loss: 0.0249 - accuracy: 0.9937
Epoch 00005: val_loss did not improve from 0.07552
299/299 [==============================] - 28s 92ms/step - loss: 0.0249 - accuracy: 0.9937 - val_loss: 0.3687 - val_accuracy: 0.8868
Epoch 6/10
299/299 [==============================] - ETA: 0s - loss: 0.0196 - accuracy: 0.9944
Epoch 00006: val_loss did not improve from 0.07552
299/299 [==============================] - 28s 93ms/step - loss: 0.0196 - accuracy: 0.9944 - val_loss: 0.1037 - val_accuracy: 0.9721
Epoch 7/10
299/299 [==============================] - ETA: 0s - loss: 0.0395 - accuracy: 0.9889
Epoch 00007: val_loss improved from 0.07552 to 0.06603, saving model to ResNet34_best_weight6.hdf5
299/299 [==============================] - 29s 95ms/step - loss: 0.0395 - accuracy: 0.9889 - val_loss: 0.0660 - val_accuracy: 0.9810
Epoch 8/10
299/299 [==============================] - ETA: 0s - loss: 0.0103 - accuracy: 0.9976
Epoch 00008: val_loss improved from 0.06603 to 0.03091, saving model to ResNet34_best_weight6.hdf5
299/299 [==============================] - 29s 96ms/step - loss: 0.0103 - accuracy: 0.9976 - val_loss: 0.0309 - val_accuracy: 0.9914
Epoch 9/10
299/299 [==============================] - ETA: 0s - loss: 0.0061 - accuracy: 0.9986
Epoch 00009: val_loss did not improve from 0.03091
299/299 [==============================] - 28s 92ms/step - loss: 0.0061 - accuracy: 0.9986 - val_loss: 0.0666 - val_accuracy: 0.9816
Epoch 10/10
299/299 [==============================] - ETA: 0s - loss: 0.0462 - accuracy: 0.9859
Epoch 00010: val_loss did not improve from 0.03091
299/299 [==============================] - 28s 93ms/step - loss: 0.0462 - accuracy: 0.9859 - val_loss: 0.1873 - val_accuracy: 0.9392
Training Time : 287.8884177207947 

141/141 [==============================] - 3s 21ms/step - loss: 0.1901 - accuracy: 0.9400
Learning rate: 0.001 & Batch size: 16
Epoch 1/10
896/897 [============================>.] - ETA: 0s - loss: 0.6680 - accuracy: 0.7942
Epoch 00001: val_loss improved from inf to 0.40057, saving model to ResNet34_best_weight7.hdf5
897/897 [==============================] - 43s 48ms/step - loss: 0.6677 - accuracy: 0.7942 - val_loss: 0.4006 - val_accuracy: 0.8793
Epoch 2/10
896/897 [============================>.] - ETA: 0s - loss: 0.1417 - accuracy: 0.9576
Epoch 00002: val_loss did not improve from 0.40057
897/897 [==============================] - 41s 46ms/step - loss: 0.1415 - accuracy: 0.9576 - val_loss: 1.4157 - val_accuracy: 0.6399
Epoch 3/10
896/897 [============================>.] - ETA: 0s - loss: 0.0954 - accuracy: 0.9714
Epoch 00003: val_loss improved from 0.40057 to 0.08858, saving model to ResNet34_best_weight7.hdf5
897/897 [==============================] - 42s 47ms/step - loss: 0.0957 - accuracy: 0.9714 - val_loss: 0.0886 - val_accuracy: 0.9738
Epoch 4/10
896/897 [============================>.] - ETA: 0s - loss: 0.0729 - accuracy: 0.9771
Epoch 00004: val_loss did not improve from 0.08858
897/897 [==============================] - 41s 46ms/step - loss: 0.0729 - accuracy: 0.9771 - val_loss: 0.1739 - val_accuracy: 0.9535
Epoch 5/10
897/897 [==============================] - ETA: 0s - loss: 0.0704 - accuracy: 0.9790
Epoch 00005: val_loss improved from 0.08858 to 0.07773, saving model to ResNet34_best_weight7.hdf5
897/897 [==============================] - 42s 47ms/step - loss: 0.0704 - accuracy: 0.9790 - val_loss: 0.0777 - val_accuracy: 0.9766
Epoch 6/10
897/897 [==============================] - ETA: 0s - loss: 0.0466 - accuracy: 0.9859
Epoch 00006: val_loss improved from 0.07773 to 0.04772, saving model to ResNet34_best_weight7.hdf5
897/897 [==============================] - 42s 47ms/step - loss: 0.0466 - accuracy: 0.9859 - val_loss: 0.0477 - val_accuracy: 0.9883
Epoch 7/10
897/897 [==============================] - ETA: 0s - loss: 0.0553 - accuracy: 0.9846
Epoch 00007: val_loss did not improve from 0.04772
897/897 [==============================] - 41s 46ms/step - loss: 0.0553 - accuracy: 0.9846 - val_loss: 0.3567 - val_accuracy: 0.9100
Epoch 8/10
897/897 [==============================] - ETA: 0s - loss: 0.0446 - accuracy: 0.9888
Epoch 00008: val_loss did not improve from 0.04772
897/897 [==============================] - 42s 46ms/step - loss: 0.0446 - accuracy: 0.9888 - val_loss: 0.0595 - val_accuracy: 0.9794
Epoch 9/10
896/897 [============================>.] - ETA: 0s - loss: 0.0305 - accuracy: 0.9917
Epoch 00009: val_loss did not improve from 0.04772
897/897 [==============================] - 42s 47ms/step - loss: 0.0305 - accuracy: 0.9916 - val_loss: 0.0971 - val_accuracy: 0.9705
Training Time : 382.1436674594879 

141/141 [==============================] - 3s 21ms/step - loss: 0.1076 - accuracy: 0.9654
Learning rate: 0.001 & Batch size: 32
Epoch 1/10
448/449 [============================>.] - ETA: 0s - loss: 0.6163 - accuracy: 0.8191
Epoch 00001: val_loss improved from inf to 1.15266, saving model to ResNet34_best_weight8.hdf5
449/449 [==============================] - 32s 72ms/step - loss: 0.6159 - accuracy: 0.8190 - val_loss: 1.1527 - val_accuracy: 0.7216
Epoch 2/10
448/449 [============================>.] - ETA: 0s - loss: 0.1036 - accuracy: 0.9683
Epoch 00002: val_loss improved from 1.15266 to 0.26626, saving model to ResNet34_best_weight8.hdf5
449/449 [==============================] - 32s 70ms/step - loss: 0.1036 - accuracy: 0.9684 - val_loss: 0.2663 - val_accuracy: 0.9284
Epoch 3/10
448/449 [============================>.] - ETA: 0s - loss: 0.0702 - accuracy: 0.9791
Epoch 00003: val_loss improved from 0.26626 to 0.24946, saving model to ResNet34_best_weight8.hdf5
449/449 [==============================] - 32s 70ms/step - loss: 0.0702 - accuracy: 0.9791 - val_loss: 0.2495 - val_accuracy: 0.9264
Epoch 4/10
448/449 [============================>.] - ETA: 0s - loss: 0.0458 - accuracy: 0.9863
Epoch 00004: val_loss improved from 0.24946 to 0.13730, saving model to ResNet34_best_weight8.hdf5
449/449 [==============================] - 31s 70ms/step - loss: 0.0460 - accuracy: 0.9862 - val_loss: 0.1373 - val_accuracy: 0.9574
Epoch 5/10
448/449 [============================>.] - ETA: 0s - loss: 0.0462 - accuracy: 0.9875
Epoch 00005: val_loss improved from 0.13730 to 0.07356, saving model to ResNet34_best_weight8.hdf5
449/449 [==============================] - 32s 71ms/step - loss: 0.0461 - accuracy: 0.9875 - val_loss: 0.0736 - val_accuracy: 0.9794
Epoch 6/10
448/449 [============================>.] - ETA: 0s - loss: 0.0427 - accuracy: 0.9875
Epoch 00006: val_loss did not improve from 0.07356
449/449 [==============================] - 31s 68ms/step - loss: 0.0427 - accuracy: 0.9875 - val_loss: 0.1045 - val_accuracy: 0.9682
Epoch 7/10
448/449 [============================>.] - ETA: 0s - loss: 0.0324 - accuracy: 0.9911
Epoch 00007: val_loss did not improve from 0.07356
449/449 [==============================] - 31s 69ms/step - loss: 0.0323 - accuracy: 0.9912 - val_loss: 0.0738 - val_accuracy: 0.9760
Epoch 8/10
448/449 [============================>.] - ETA: 0s - loss: 0.0348 - accuracy: 0.9899
Epoch 00008: val_loss did not improve from 0.07356
449/449 [==============================] - 31s 68ms/step - loss: 0.0351 - accuracy: 0.9898 - val_loss: 6.2853 - val_accuracy: 0.2804
Training Time : 255.49612045288086 

141/141 [==============================] - 3s 20ms/step - loss: 6.1295 - accuracy: 0.2807
Learning rate: 0.001 & Batch size: 48
Epoch 1/10
299/299 [==============================] - ETA: 0s - loss: 0.6800 - accuracy: 0.8070
Epoch 00001: val_loss improved from inf to 0.32662, saving model to ResNet34_best_weight9.hdf5
299/299 [==============================] - 30s 100ms/step - loss: 0.6800 - accuracy: 0.8070 - val_loss: 0.3266 - val_accuracy: 0.8955
Epoch 2/10
299/299 [==============================] - ETA: 0s - loss: 0.0937 - accuracy: 0.9709
Epoch 00002: val_loss did not improve from 0.32662
299/299 [==============================] - 28s 93ms/step - loss: 0.0937 - accuracy: 0.9709 - val_loss: 0.9448 - val_accuracy: 0.8002
Epoch 3/10
299/299 [==============================] - ETA: 0s - loss: 0.0594 - accuracy: 0.9829
Epoch 00003: val_loss did not improve from 0.32662
299/299 [==============================] - 28s 93ms/step - loss: 0.0594 - accuracy: 0.9829 - val_loss: 0.3300 - val_accuracy: 0.9027
Epoch 4/10
299/299 [==============================] - ETA: 0s - loss: 0.0394 - accuracy: 0.9889
Epoch 00004: val_loss did not improve from 0.32662
299/299 [==============================] - 28s 93ms/step - loss: 0.0394 - accuracy: 0.9889 - val_loss: 0.7898 - val_accuracy: 0.8205
Training Time : 118.26880311965942 

141/141 [==============================] - 3s 21ms/step - loss: 0.7794 - accuracy: 0.8129

Confusion Matrix

In [ ]:
best_model = tf.keras.models.load_model('ResNet34_best_weight4.hdf5')
y_pr=best_model.predict(X_test)
y_pred=[]
for i in range(len(y_pr)):
  y_pred.append(np.argmax(y_pr[i]))

cm=confusion_matrix(y_test,y_pred)
ac=accuracy_score(y_test,y_pred)
print("Accuracy", ac)
print("Confusion matrix")
plot_confusion_matrix(cm,figsize=(9,9),colorbar=True)
Accuracy 0.989520624303233
Confusion matrix
Out[ ]:
(<Figure size 648x648 with 2 Axes>,
 <matplotlib.axes._subplots.AxesSubplot at 0x7f39f0041910>)

Filter and Feature Observation

In [ ]:
img = X_test[0]
plt.imshow(img)
plt.grid(False)

x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
In [ ]:
'''
model = Model(inputs=VGG16_model, outputs=outputs)
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(),
              metrics=['accuracy'])
'''
layer_outputs = [layer.output for layer in ResNet34_model.layers if not layer.name.startswith('input')]
activation_model = Model(inputs=ResNet34_model.input, outputs=layer_outputs)
activations = activation_model.predict(x)
#first_conv_activation = activations[1]

def plot_activations(layer_idx, activations):
    layer_activation = activations[layer_idx][0]
    n_row = 8
    n_column = layer_activation.shape[-1]//n_row
    
    f, ax = plt.subplots(n_row, n_column)
    for i in range(n_row):
        for j in range(n_column):
            channel_image = layer_activation[:, :, i*n_column+j]
            # image post-processing for better visualization
            channel_image -= channel_image.mean()
            channel_image /= channel_image.std()
            channel_image *= 64
            channel_image += 128
            channel_image = np.clip(channel_image, 0, 255).astype('uint8')
        
            ax[i, j].imshow(channel_image,)
            ax[i, j].axis('off')
            ax[i, j].set_xticklabels([])
            ax[i, j].set_yticklabels([])
    plt.subplots_adjust(wspace=0, hspace=0)
    f.set_size_inches(n_column, n_row)
    plt.show()
In [ ]:
for i in (1, 2, 3, 8, 10):
    print(f'Feature_map with {i} convolution layer')
    plot_activations(i, activations)
Feature_map with 1 convolution layer
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:23: RuntimeWarning: invalid value encountered in true_divide
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:23: RuntimeWarning: divide by zero encountered in true_divide
Feature_map with 2 convolution layer
Feature_map with 3 convolution layer
Feature_map with 8 convolution layer
Feature_map with 10 convolution layer